In [1]:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import rnn
First, we create some data. In a real example, this would be loaded up out of the file.
In this case, input_data is two values, and output_data is one value (the thing we're trying to predict given the input_data). For the particular data I've generated here, you can't do it given only the current input_data; you can only make an accurate prediction given the previous input_data as well.
In [2]:
t = np.arange(50)*0.05
input_data = np.sign(np.array([np.sin(2*np.pi*t),np.sin(2*np.pi*t)]).T).astype(float)
input_data += np.random.normal(size=input_data.shape)*0.1
output_data = (np.sign(np.sin(2*np.pi*t*2+np.pi)).astype(float)+1)/2
print('Input Data', input_data)
print('Output Data', output_data)
Let's plot that data, just to make it clearer
In [3]:
plt.subplot(2,1,1)
plt.plot(input_data)
plt.title('input data')
plt.subplot(2,1,2)
plt.plot(output_data)
plt.title('output data')
plt.tight_layout()
plt.show()
Now we need to make our network and train it.
In [11]:
n_epochs = 4000 # number of times to run the training
n_units = 200 # size of the neural network
n_classes = 1 # number of values in the output
n_features = 2 # number of values in the input
Now we create our network. I don't quite understand exactly what's happening here, but I copied it from an LSTM tutorial.
In [5]:
X = tf.placeholder('float',[None,n_features])
Y = tf.placeholder('float')
weights = tf.Variable(tf.random_normal([n_units, n_classes]))
bias = tf.Variable(tf.random_normal([n_classes]))
x = tf.split(X, n_features, 1)
lstm_cell = rnn.BasicLSTMCell(n_units)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
output = tf.matmul(outputs[-1], weights) + bias
output = tf.reshape(output, [-1])
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=Y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
Now we train it.
In [6]:
with tf.Session() as session:
# initialize the network
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
# now do the training
for epoch in range(n_epochs):
# this does one pass through the traiing
_, error = session.run([optimizer, cost], feed_dict={X: input_data, Y: output_data})
# print a message every 100 epochs
if epoch % 100 == 0:
print('Epoch', epoch, 'completed out of', n_epochs, 'error:', error)
# now compute the output after training
pred = tf.round(tf.nn.sigmoid(output)).eval({X: input_data})
In [10]:
plt.subplot(2, 1, 1)
plt.title('ideal output')
plt.plot(output_data)
plt.subplot(2, 1, 2)
plt.title('predicted output')
plt.plot(pred)
plt.tight_layout()
plt.show()
In [ ]: